from pyspark.sql.types import *
schema = StructType([
StructField("x0", DoubleType(), True),
StructField("x1", DoubleType(), True),
StructField("x2", DoubleType(), True),
StructField("x3", DoubleType(), True),
StructField("x4", DoubleType(), True),
StructField("x5", DoubleType(), True),
StructField("x6", DoubleType(), True),
StructField("x7", DoubleType(), True),
StructField("x8", DoubleType(), True),
StructField("x9", DoubleType(), True),
StructField("x10", DoubleType(), True),
StructField("x11", DoubleType(), True),
StructField("x12", DoubleType(), True),
StructField("x13", DoubleType(), True),
StructField("x14", DoubleType(), True),
StructField("x15", DoubleType(), True),
StructField("x16", DoubleType(), True),
StructField("x17", DoubleType(), True),
StructField("x18", StringType(), True),
StructField("x19", DoubleType(), True),
StructField("x20", DoubleType(), True),
StructField("x21", DoubleType(), True),
StructField("x22", DoubleType(), True),
StructField("x23", DoubleType(), True),
StructField("x24", DoubleType(), True),
StructField("x25", DoubleType(), True),
StructField("x26", DoubleType(), True),
StructField("x27", DoubleType(), True),
StructField("x28", DoubleType(), True),
StructField("x29", DoubleType(), True),
StructField("x30", DoubleType(), True),
StructField("x31", DoubleType(), True),
StructField("x32", DoubleType(), True),
StructField("x33", DoubleType(), True),
StructField("x34", DoubleType(), True),
StructField("x35", DoubleType(), True),
StructField("x36", DoubleType(), True),
StructField("x37", DoubleType(), True),
StructField("x38", DoubleType(), True),
StructField("x39", DoubleType(), True),
StructField("x40", DoubleType(), True),
StructField("x41", DoubleType(), True),
StructField("x42", DoubleType(), True),
StructField("x43", DoubleType(), True),
StructField("x44", DoubleType(), True),
StructField("x45", DoubleType(), True),
StructField("x46", DoubleType(), True),
StructField("x47", DoubleType(), True),
StructField("x48", DoubleType(), True),
StructField("x49", DoubleType(), True),
StructField("x50", DoubleType(), True),
StructField("x51", DoubleType(), True),
StructField("x52", DoubleType(), True),
StructField("x53", DoubleType(), True),
StructField("x54", DoubleType(), True),
StructField("x55", DoubleType(), True),
StructField("x56", DoubleType(), True),
StructField("x57", DoubleType(), True),
StructField("x58", DoubleType(), True),
StructField("x59", DoubleType(), True),
StructField("x60", DoubleType(), True),
StructField("x61", DoubleType(), True),
StructField("x62", DoubleType(), True),
StructField("x63", StringType(), True),
StructField("x64", DoubleType(), True),
StructField("x65", DoubleType(), True),
StructField("x66", DoubleType(), True),
StructField("x67", DoubleType(), True),
StructField("x68", DoubleType(), True),
StructField("x69", StringType(), True),
StructField("x70", DoubleType(), True),
StructField("x71", DoubleType(), True),
StructField("x72", DoubleType(), True),
StructField("x73", DoubleType(), True),
StructField("x74", DoubleType(), True),
StructField("x75", DoubleType(), True),
StructField("x76", DoubleType(), True),
StructField("x77", DoubleType(), True),
StructField("x78", DoubleType(), True),
StructField("x79", DoubleType(), True),
StructField("x80", DoubleType(), True),
StructField("x81", DoubleType(), True),
StructField("x82", DoubleType(), True),
StructField("x83", DoubleType(), True),
StructField("x84", DoubleType(), True),
StructField("x85", DoubleType(), True),
StructField("x86", DoubleType(), True),
StructField("x87", DoubleType(), True),
StructField("x88", DoubleType(), True),
StructField("x89", DoubleType(), True),
StructField("x90", DoubleType(), True),
StructField("x91", DoubleType(), True),
StructField("x92", DoubleType(), True),
StructField("x93", DoubleType(), True),
StructField("x94", DoubleType(), True),
StructField("x95", DoubleType(), True),
StructField("x96", DoubleType(), True),
StructField("x97", DoubleType(), True),
StructField("x98", DoubleType(), True),
StructField("x99", DoubleType(), True),
StructField("y", DoubleType(), True)
])
sparkSession=SparkSession.builder.master("local[*]").appName("appName").config("spark.sql.warehouse.dir", "./spark-warehouse").getOrCreate()
# Loads data
from pyspark.sql import SQLContext
from pyspark.ml.feature import VectorAssembler
##############################
# Uncomment for Spark 1.6
# sqlContext = SQLContext(sc)
# train = sqlContext.read.format("csv").schema(schema).option("header", "true").load("my_dataset_train.csv")
# test = sqlContext.read.format("csv").schema(schema).option("header", "true").load("my_dataset_test.csv")
##############################
# Uncomment for Spark 2.1
train = sparkSession.read.format("csv").schema(schema).option("header", "true").load("/Users/idownard/development/snape/my_dataset_train.csv")
test = sparkSession.read.format("csv").schema(schema).option("header", "true").load("/Users/idownard/development/snape/my_dataset_test.csv")
# Identifies feature columns
featureCols = ["x1","x2","x3","x4","x5","x6","x7","x8","x9","x10","x11","x12"]
assembler = VectorAssembler(
inputCols=featureCols,
outputCol="features")
trainData = assembler.transform(train)
testData = assembler.transform(test)
print("Assembled features:")
trainData.select("features").limit(5).show(truncate=False)
# Trains a k-means model.
from pyspark.ml.clustering import KMeans
kmeans = KMeans().setK(4).setFeaturesCol("features").setMaxIter(1)
model = kmeans.fit(trainData)
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
# Shows the result
transformed = model.transform(testData)
transformed.selectExpr("abs(x2)","abs(x3)","prediction").limit(5).show(truncate=False)
#transformed.select("x2","x3","prediction").limit(5).show(truncate=False)
#transformed.createOrReplaceTempView("tempview")
predictions = transformed.selectExpr("abs(x1)", "prediction").groupBy("prediction").agg({'prediction': 'count'}).orderBy("prediction")
predictions.show(truncate=False)
#print(type(predictions))
#predictions.printSchema()
data=predictions.toPandas()
print(type(data))
print(data)
# Initializes charting library
import cufflinks as cf
import pandas as pd
cf.set_config_file(world_readable=True,offline=True)
# Plots pie chart of persona group sizes
data.iplot(kind='pie',labels='prediction',values='count(prediction)',title="Persona Size")
import plotly
import plotly.graph_objs as go
plotly.offline.init_notebook_mode()
# plots heatmap
df = transformed.selectExpr("abs(x1)","abs(x2)","abs(x3)","abs(x4)","abs(x5)","abs(x6)","abs(x7)","abs(x8)","abs(x9)","abs(x10)","abs(x11)","abs(x12)","prediction").groupBy("prediction").agg(
{'abs(x1)': 'sum',
'abs(x2)': 'sum',
'abs(x3)': 'sum',
'abs(x4)': 'sum',
'abs(x5)': 'sum',
'abs(x6)': 'sum',
'abs(x7)': 'sum',
'abs(x8)': 'sum',
'abs(x9)': 'sum',
'abs(x10)': 'sum',
'abs(x11)': 'sum',
'abs(x12)': 'sum'}).orderBy("prediction")
zdata = df.toPandas()
data = [go.Heatmap( z=zdata.values.tolist(),
y=['Persona A', 'Persona B', 'Persona C', 'Persona D'],
x=['Debit Card',
'Personal Credit Card',
'Business Credit Card',
'Home Mortgage Loan',
'Auto Loan',
'Brokerage Account',
'Roth IRA',
'401k',
'Home Insurance',
'Automobile Insurance',
'Medical Insurance',
'Life Insurance',
'Cell Phone',
'Landline'
],
colorscale='Viridis')]
plotly.offline.iplot(data, filename='pandas-heatmap')
from sklearn.datasets import make_classification, make_regression
from sklearn.externals import six
import pandas as pd
import numpy as np
import argparse
import json
import re
import os
import sys
def rename_columns(df, prefix='x'):
"""
Rename the columns of a dataframe to have X in front of them
:param df: data frame we're operating on
:param prefix: the prefix string
"""
df = df.copy()
df.columns = [prefix + str(i) for i in df.columns]
return df
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
# Plots 2d scatter plot. Makes sense for clustering two feature columns. Clusters may really overlap if there are more than 2 feature columns.
X, Y = make_classification(n_samples=1000, n_classes=2, n_features=2, n_redundant=0, n_informative=2,
scale=1000, n_clusters_per_class=1)
df = pd.DataFrame(X)
plt.scatter(X[:, 0], X[:, 1], marker='o', c=Y)
plt.show()
print(df.head())
import plotly.plotly as py
import pandas as pd
X, Y = make_classification(n_samples=100, n_classes=3, n_features=3, n_redundant=0, n_informative=3,
scale=1000, n_clusters_per_class=1)
# Plots 3d scatter plot. Makes sense for clustering three feature columns. Clusters may really overlap if there are more than 3 feature columns.
df = pd.DataFrame(X)
# rename X columns
df = rename_columns(df)
# and add the Y
df['y'] = Y
# Extract out clusters to new dataframes
cluster1=df.loc[df['y'] == 0]
cluster2=df.loc[df['y'] == 1]
cluster3=df.loc[df['y'] == 2]
scatter1 = dict(
mode = "markers",
name = "Income",
type = "scatter3d",
x = cluster1.as_matrix()[:,0], y = cluster1.as_matrix()[:,1], z = cluster1.as_matrix()[:,2],
marker = dict( size=2, color='green')
)
scatter2 = dict(
mode = "markers",
name = "Spending",
type = "scatter3d",
x = cluster2.as_matrix()[:,0], y = cluster2.as_matrix()[:,1], z = cluster2.as_matrix()[:,2],
marker = dict( size=2, color='blue')
)
scatter3 = dict(
mode = "markers",
name = "Leverage",
type = "scatter3d",
x = cluster3.as_matrix()[:,0], y = cluster3.as_matrix()[:,1], z = cluster3.as_matrix()[:,2],
marker = dict( size=2, color='red')
)
cluster1 = dict(
alphahull = 5,
name = "Income",
opacity = .1,
type = "mesh3d",
x = cluster1.as_matrix()[:,0], y = cluster1.as_matrix()[:,1], z = cluster1.as_matrix()[:,2],
color='green', showscale = True
)
cluster2 = dict(
alphahull = 5,
name = "Spending",
opacity = .1,
type = "mesh3d",
x = cluster2.as_matrix()[:,0], y = cluster2.as_matrix()[:,1], z = cluster2.as_matrix()[:,2],
color='blue', showscale = True
)
cluster3 = dict(
alphahull = 5,
name = "Leverage",
opacity = .1,
type = "mesh3d",
x = cluster3.as_matrix()[:,0], y = cluster3.as_matrix()[:,1], z = cluster3.as_matrix()[:,2],
color='red', showscale = True
)
layout = dict(
title = 'Customer Segment Shapes',
scene = dict(
xaxis = dict( zeroline=False ),
yaxis = dict( zeroline=False ),
zaxis = dict( zeroline=False ),
)
)
fig = dict( data=[scatter1, scatter2, scatter3, cluster1, cluster2, cluster3], layout=layout )
# Use py.iplot() for IPython notebook
plotly.offline.iplot(fig, filename='mesh3d_sample')
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()